library(readxl)
library(ggplot2)
library(dplyr)
library(psych)
library(lavaan)
library(tidyr)
library(corrplot)
library(semPlot)
library(semTools)
library(knitr)
library(Hmisc)
library(MVN)
library(PMCMR)
library(PMCMRplus)
library(Rmisc)
library(jtools)
library(apaTables)
library(paran)
library(GPArotation)
library(stringr)
options(warn = -1)
options(err = -1)
options(repr.plot.width = 10, repr.plot.height = 5)
setwd('/Users/vladankusev/Desktop/nis')
data <- read_xlsx('kursach_finale.xlsx')
#Numeric для всех
data[1:158] = lapply(data[1:158], FUN = function(y){as.numeric(y)})
#Основная шкала
#Считаем SD для основной шкалы
data$SD_main_scale <- round(apply(data[1:100], 1, sd),1)
#Какие наблюдения имеют SD 0?
which(data$SD_main_scale == 0)
data <- data[-c(263, 277, 293, 307,
388, 446, 530, 532, 544, 546, 548),]
#Считаем SD для всей шкалы самотолкования
data$SD_self_concept <- round(apply(data[101:135], 1, sd),1)
#Какие наблюдения имеют SD 0?
which(data$SD_self_concept == 0)
data <- data[-c(127, 257, 385, 532, 534),]
#Считаем SD для подшкалы родственного самотолкования
data$SD_self_relative <- round(apply(data[101:111], 1, sd),1)
#Какие наблюдения имеют SD 0?
which(data$SD_self_relative == 0)
data <- data[-c(36, 525),]
#Считаем SD для подшкалы коллективного самотолкования
data$SD_self_concept_collective <- round(apply(data[112:123], 1, sd),1)
#Какие наблюдения имеют SD 0?
which(data$SD_self_concept_collective == 0)
data <- data[-c(145, 420),]
#Считаем SD для подшкалы независимого самотолкования
data$SD_self_concept_autonomy <- round(apply(data[124:135], 1, sd),1)
#Какие наблюдения имеют SD 0?
which(data$SD_self_concept_autonomy == 0)
data <- data[-c(524),]
#Считаем SD для всей шкалы Деси Райна
data$SD_deci_ryan <- round(apply(data[137:157], 1, sd),1)
#Какие наблюдения имеют SD 0?
which(data$SD_deci_ryan == 0)
data <- data[-c(382, 447),]
#Считаем SD для подшкалы Автономии
data$SD_deci_ryan_autonomy <- round(apply(data[, grepl("Autonomy", names(data))],1,sd),1)
#Какие наблюдения имеют SD 0?
which(data$SD_deci_ryan_autonomy == 0)
data <- data[-c(413,443),]
#Считаем SD для подшкалы Связанности
data$SD_deci_ryan_relatedness <- round(apply(data[, grepl("Relatedness", names(data))],1,sd),1)
#Какие наблюдения имеют SD 0?
which(data$SD_deci_ryan_relatedness == 0)
data <- data[-c(273),]
#Считаем SD для подшкалы Компетентности
data$SD_deci_ryan_competence <- round(apply(data[, grepl("Competence", names(data))],1,sd),1)
#Какие наблюдения имеют SD 0?
which(data$SD_deci_ryan_competence == 0)
data <- data[-c(34,501,502),]
table(data$Sex)
Да Женщина Мужской
1 265 1
Мужчина Предпочитаю не указывать
252 1
Удалим Да и Предпочитаю не указывать
#Удаляем эти варианты ответов
data <- data[!(data$Sex == "Да" | data$Sex == "Предпочитаю не указывать"),]
#Меняем названия и тип ппеременной
data["Sex"][data["Sex"] == "Мужской"] <- "Мужчина"
data$Sex <- as.factor(data$Sex)
round((table(data$Sex) / nrow(data)) * 100,2)
Женщина Мужчина 51.16 48.84
Вручную меняем тип...
data["Town"][data["Town"] == "Армавир"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Астрахань"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Балашиха"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Белгород"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Белянино"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Березники"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Братск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Брянск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Владивосток"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Владимир"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Вогоград"] <- "Миллионик"
data["Town"][data["Town"] == "Волгоград"] <- "Миллионик"
data["Town"][data["Town"] == "Волхов"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Воркута"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Воронежское"] <- "Миллионик"
data["Town"][data["Town"] == "Ворсма"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Гагарин"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Геленджик"] <- "Меньше миллиона"
data <- data[!(data$Town == "Город"),]
data["Town"][data["Town"] == "Город с населением менее 100 тысяч человек"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Город с населением от 100 до 250 тысяч человек"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Город с населением от 250 до 500 тысяч человек"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Город с населением от 500 тысяч до 1 млн человек"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Дзержинск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Другой город-миллионник (Новосибирск, Екатеринбург, Нижний-Новгород, Казань, Челябинск, Омск, Самара, Ростов-на-Дону, Уфа, Красноярск, Пермь, Воронеж, Волгоград, Краснодар)"] <- "Миллионик"
data["Town"][data["Town"] == "Евпатория"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Екатеринбург"] <- "Миллионик"
data["Town"][data["Town"] == "Заволжье"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Зеленоград"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Ивановка"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Иваново"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Ивантеевка"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Ижевск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Ильинка"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Инза"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Иркутск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Искитим"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Йошкар-ола"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Казань"] <- "Миллионик"
data["Town"][data["Town"] == "Калининград"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Калуга"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Кемерово"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Керчь"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Ачикулак"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Кисловодск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Кириши"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Королев"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Котельники"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Красноярск"] <- "Миллионик"
data["Town"][data["Town"] == "Красный Кут"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Красный сулин"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Кропоткин"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Кунгур"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Курск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Ленинск-Кузнецкий"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Люберцы"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Люберцы Московская область"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Магнитогорск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Майкоп"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Меньше миллиона"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Менделеевск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Миллионик"] <- "Миллионик"
data["Town"][data["Town"] == "Москва / Петербург"] <- "Столица"
data["Town"][data["Town"] == "Мытищи"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Мурмансе"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Невинномысск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Нефтекамск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Новосибирск"] <- "Миллионик"
data["Town"][data["Town"] == "Оренбург"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Орехово-Зуево"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Павлово"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Павловский Посад"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Пенза"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Пермь"] <- "Миллионик"
data["Town"][data["Town"] == "Петрозаводск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Поселок городского типа/деревня"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Мытищи МО"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Подольск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Псков"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Пятигорск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Ржев"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Ростов-на-Дону"] <- "Миллионик"
data["Town"][data["Town"] == "Сальск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Самара"] <- "Миллионик"
data["Town"][data["Town"] == "Саранск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Саратов"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Сасово"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Сафоново"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Симферополь"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Смоленск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Снежинск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Сокол"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Сочи"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Ставрополь"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Таганрог"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Сызрань"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Таганрог"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Тамбов"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Тверь"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Троицк, Челябинская область."] <- "Меньше миллиона"
data["Town"][data["Town"] == "Трубичино"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Тула"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Усолье-Сибирское"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Феодосия"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Химки"] <- "Меньше миллиона"
data["Town"][data["Town"] == "ЦФО"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Чебоксары"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Челябинск"] <- "Миллионик"
data["Town"][data["Town"] == "Чехов"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Череповец"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Якшур-Бодья"] <- "Меньше миллиона"
data["Town"][data["Town"] == "бирюч"] <- "Меньше миллиона"
data["Town"][data["Town"] == "оричи"] <- "Меньше миллиона"
data["Town"][data["Town"] == "йошкар-ола"] <- "Меньше миллиона"
data["Town"][data["Town"] == "знаменск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Ярцево"] <- "Меньше миллиона"
data["Town"][data["Town"] == "село Усть-Вымь"] <- "Меньше миллиона"
data["Town"][data["Town"] == "станица Марьянская"] <- "Меньше миллиона"
data["Town"][data["Town"] == "невинномысск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "каменск-уральский"] <- "Меньше миллиона"
data["Town"][data["Town"] == "г. Серов"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Соликамск"] <- "Меньше миллиона"
data["Town"][data["Town"] == "майкоп"] <- "Меньше миллиона"
data["Town"][data["Town"] == "пгт. Новофедоровка"] <- "Меньше миллиона"
data["Town"][data["Town"] == "село Безруково"] <- "Меньше миллиона"
data["Town"][data["Town"] == "воронеж"] <- "Миллионик"
data["Town"][data["Town"] == "Тольятти"] <- "Меньше миллиона"
data["Town"][data["Town"] == "кострома"] <- "Меньше миллиона"
data["Town"][data["Town"] == "саратов"] <- "Меньше миллиона"
data["Town"][data["Town"] == "ярославль"] <- "Меньше миллиона"
data["Town"][data["Town"] == "Шамонино, Уфимский район"] <- "Меньше миллиона"
data["Town"][data["Town"] == "оренбург"] <- "Меньше миллиона"
data["Town"][data["Town"] == "тула"] <- "Меньше миллиона"
data["Town"][data["Town"] == "иглино"] <- "Меньше миллиона"
data["Town"][data["Town"] == "рязань"] <- "Меньше миллиона"
data["Town"][data["Town"] == "чайковский"] <- "Меньше миллиона"
data$Town <- as.factor(data$Town)
table(data$Town)
Меньше миллиона Миллионик Столица
322 101 94
nrow(data)
describeBy(data$Age)
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| <dbl> | <dbl> | <dbl> | <dbl> | <dbl> | <dbl> | <dbl> | <dbl> | <dbl> | <dbl> | <dbl> | <dbl> | <dbl> | |
| X1 | 1 | 517 | 38.04836 | 10.65034 | 36 | 37.42651 | 10.3782 | 18 | 84 | 66 | 0.6182716 | 0.2886835 | 0.4684016 |
#Для удобства
apology_data <- data[1:100]
#Сабсет данных
sozalenie_data <- data[, grepl("Sozalenie", names(data))]
resposibility_data <- data[, grepl("Resposibility", names(data))]
raskayanie_data <- data[, grepl("Raskayanie", names(data))]
repeat_data <- data[, grepl("Repeat", names(data))]
forgive_data <- data[, grepl("Forgive", names(data))]
#Шкала сожаление
round(KMO(sozalenie_data)$MSA,2)
#Шкала отвественность
round(KMO(resposibility_data)$MSA,2)
#Шкала раскаяние
round(KMO(raskayanie_data)$MSA,2)
#Шкала неповтор. действий
round(KMO(repeat_data)$MSA,2)
#Шкала принятия извинений
round(KMO(forgive_data)$MSA,2)
#Общая шкала
round(KMO(apology_data)$MSA,2)
#Тест бартлета
cortest.bartlett(apology_data)
R was not square, finding R from data
Получили: 12 факторов
paran(apology_data, cfa = TRUE)# Параллельный анализ
#12 факторов
Using eigendecomposition of correlation matrix.
Computing: 10% 20% 30% 40% 50% 60% 70% 80% 90% 100%
Results of Horn's Parallel Analysis for factor retention
3000 iterations, using the mean estimate
--------------------------------------------------
Factor Adjusted Unadjusted Estimated
Eigenvalue Eigenvalue Bias
--------------------------------------------------
1 33.028566 34.242100 1.213533
2 2.748664 3.896099 1.147434
3 1.210479 2.307669 1.097190
4 0.758059 1.811907 1.053847
5 0.592521 1.608152 1.015631
6 0.353187 1.333183 0.979996
7 0.373151 1.320713 0.947562
8 0.259448 1.176056 0.916608
9 0.161838 1.048752 0.886913
10 0.080767 0.939682 0.858914
11 0.081487 0.913702 0.832215
12 0.009600 0.815714 0.806113
--------------------------------------------------
Adjusted eigenvalues > 0 indicate dimensions to retain.
(12 factors retained)
Получили: 1 фактор
#VSS анализ
x <- VSS(apology_data, n = 5, rotate = 'oblimin',
n.obs = length(apology_data))
#Результаты VSS анализа
print(x) #1 фактор
Very Simple Structure
Call: vss(x = x, n = n, rotate = rotate, diagonal = diagonal, fm = fm,
n.obs = n.obs, plot = plot, title = title, use = use, cor = cor)
VSS complexity 1 achieves a maximimum of 0.94 with 1 factors
VSS complexity 2 achieves a maximimum of 0.77 with 2 factors
The Velicer MAP achieves a minimum of 0 with 5 factors
BIC achieves a minimum of -18997.54 with 4 factors
Sample Size adjusted BIC achieves a minimum of -4798.05 with 5 factors
Statistics by number of factors
vss1 vss2 map dof chisq prob sqresid fit RMSEA BIC SABIC complex
1 0.94 0.00 0.0081 4850 12991 0.0e+00 79 0.94 0.057 -17312 -1917 1.0
2 0.49 0.77 0.0061 4751 11149 0.0e+00 299 0.77 0.051 -18536 -3455 1.3
3 0.40 0.69 0.0054 4653 10154 0.0e+00 325 0.75 0.048 -18918 -4149 1.6
4 0.35 0.57 0.0051 4556 9469 0.0e+00 396 0.69 0.046 -18998 -4536 1.9
5 0.19 0.44 0.0049 4460 8911 4.9e-299 501 0.61 0.044 -18955 -4798 2.2
eChisq SRMR eCRMS eBIC
1 18089 0.059 0.060 -12214
2 11247 0.047 0.048 -18437
3 8964 0.042 0.043 -20108
4 7626 0.039 0.040 -20840
5 6576 0.036 0.038 -21290
# Отдельно график на русском
VSS.plot(x, title = "Результаты критерия VSS")
options(warn = -1)
options(err = -1)
Получили: ~ 18:20 факторов
#Собственные значения
ev <- eigen(cor(apology_data)) # get eigenvalues
ev$values[1:20]# ~18-20 факторов
mvn(data = apology_data ,mvnTest ="mardia",univariateTest = "SW")[1]#результат незначимый, используем WLSMV
| Test | Statistic | p value | Result |
|---|---|---|---|
| <chr> | <fct> | <fct> | <chr> |
| Mardia Skewness | 332344.270274223 | 0 | NO |
| Mardia Kurtosis | 216.651162019779 | 0 | NO |
| MVN | NA | NA | NO |
Используем estimator = WLSMV
#Дата-сет(новый)
factor <- apology_data
#Модель №1. Общая.
#Формула модели №1.
Model_base <- "
Sozalenie =~ Sozalenie_1 + Sozalenie_2 + Sozalenie_3 + Sozalenie_4 + Sozalenie_5 + Sozalenie_6 +
Sozalenie_7 + Sozalenie_8 + Sozalenie_9 + Sozalenie_10 + Sozalenie_11 + Sozalenie_12 + Sozalenie_13 +
Sozalenie_14 + Sozalenie_15 + Sozalenie_16 + Sozalenie_17 + Sozalenie_18 + Sozalenie_19 + Sozalenie_20
Resposibility =~ Resposibility_1 + Resposibility_2 + Resposibility_3 + Resposibility_4 + Resposibility_5 +
Resposibility_6 + Resposibility_7 + Resposibility_8 + Resposibility_9 + Resposibility_10 + Resposibility_11 +
Resposibility_12 + Resposibility_13 + Resposibility_14 + Resposibility_15 + Resposibility_16 + Resposibility_17 +
Resposibility_18 + Resposibility_19 + Resposibility_20
Raskayanie =~ Raskayanie_1 + Raskayanie_2 + Raskayanie_3 + Raskayanie_4 + Raskayanie_5 + Raskayanie_6 +
Raskayanie_7 + Raskayanie_8 + Raskayanie_9 + Raskayanie_10 + Raskayanie_11 + Raskayanie_12 + Raskayanie_13 +
Raskayanie_14 + Raskayanie_15 + Raskayanie_16 + Raskayanie_17 + Raskayanie_18 + Raskayanie_19 + Raskayanie_20
Repeat =~ Repeat_1 + Repeat_2 + Repeat_3 + Repeat_4 + Repeat_5 + Repeat_6 + Repeat_7 + Repeat_8 + Repeat_9 +
Repeat_10 + Repeat_11 + Repeat_12 + Repeat_13 + Repeat_14 + Repeat_15 + Repeat_16 + Repeat_17 + Repeat_18 +
Repeat_20
Forgive =~ Forgive_1 + Forgive_2 + Forgive_3 + Forgive_4 + Forgive_5 + Forgive_6 + Forgive_7 + Forgive_8 +
Forgive_9 + Forgive_10 + Forgive_11 + Forgive_12 + Forgive_13 + Forgive_14 + Forgive_15 + Forgive_16 + Forgive_17 +
Forgive_18 + Forgive_19 + Forgive_20"
# #Модель №1: использовать порядковые переменные + аргумент, который работает с ненормально распределением estimator = "WLSMV".
fit_Base <- cfa (Model_base, data = factor, missing = "listwise",estimator = 'WLSMV',
ordered=c("Sozalenie_1", "Sozalenie_2", "Sozalenie_3", "Sozalenie_4", "Sozalenie_5",
"Sozalenie_6", "Sozalenie_7", "Sozalenie_8", "Sozalenie_9", "Sozalenie_10",
"Sozalenie_11", "Sozalenie_12", "Sozalenie_13", "Sozalenie_14", "Sozalenie_15",
"Sozalenie_16", "Sozalenie_17", "Sozalenie_18", "Sozalenie_19", "Sozalenie_20",
"Resposibility_1", "Resposibility_2", "Resposibility_3", "Resposibility_4",
"Resposibility_5", "Resposibility_6", "Resposibility_7", "Resposibility_8",
"Resposibility_9", "Resposibility_10","Resposibility_11", "Resposibility_12",
"Resposibility_13", "Resposibility_14", "Resposibility_15","Resposibility_16",
"Resposibility_17", "Resposibility_18", "Resposibility_19", "Resposibility_20",
"Raskayanie_1", "Raskayanie_2", "Raskayanie_3", "Raskayanie_4", "Raskayanie_5",
"Raskayanie_6", "Raskayanie_7", "Raskayanie_8", "Raskayanie_9", "Raskayanie_10",
"Raskayanie_11", "Raskayanie_12", "Raskayanie_13", "Raskayanie_14", "Raskayanie_15",
"Raskayanie_16", "Raskayanie_17", "Raskayanie_18", "Raskayanie_19", "Raskayanie_20",
"Repeat_1", "Repeat_2", "Repeat_3", "Repeat_4", "Repeat_5", "Repeat_6",
"Repeat_7", "Repeat_8", "Repeat_9", "Repeat_10", "Repeat_11", "Repeat_12", "Repeat_13",
"Repeat_14", "Repeat_15", "Repeat_16", "Repeat_17", "Repeat_18", "Repeat_19",
"Repeat_20", "Forgive_1", "Forgive_2", "Forgive_3", "Forgive_4", "Forgive_5",
"Forgive_6", "Forgive_7", "Forgive_8", "Forgive_9", "Forgive_10", "Forgive_11",
"Forgive_12", "Forgive_13", "Forgive_14", "Forgive_15", "Forgive_16", "Forgive_17",
"Forgive_18", "Forgive_19", "Forgive_20"))
#Краткий аутпут
fit_Base_scaled<- c("chisq.scaled", "df.scaled",
"cfi.scaled", "tli.scaled", "rmsea.scaled",
"rmsea.ci.upper.scaled", "rmsea.ci.lower.scaled","srmr","aic","bic")
round(fitMeasures(fit_Base, fit_Base_scaled),3)
#Считаем предикты (оценка, основанная на факторной модели)
data$factor1_predict<- predict(fit_Base)[,1]
data$factor2_predict<- predict(fit_Base)[,2]
data$factor3_predict<- predict(fit_Base)[,3]
data$factor4_predict<- predict(fit_Base)[,4]
data$factor5_predict<- predict(fit_Base)[,5]
#Подсчет суммы пунктов для каждого фактора
data$Factor1_sum <- factor$Sozalenie_1 + factor$Sozalenie_2 + factor$Sozalenie_3 + factor$Sozalenie_4 +
factor$Sozalenie_5 + factor$Sozalenie_6 + factor$Sozalenie_7 + factor$Sozalenie_8 + factor$Sozalenie_9 +
factor$Sozalenie_10 + factor$Sozalenie_11 + factor$Sozalenie_12 + factor$Sozalenie_13 +
factor$Sozalenie_14 + factor$Sozalenie_15 + factor$Sozalenie_16 + factor$Sozalenie_17 + factor$Sozalenie_18 +
factor$Sozalenie_19 + factor$Sozalenie_20
data$Factor2_sum <- factor$Resposibility_1 + factor$Resposibility_2 + factor$Resposibility_3 + factor$Resposibility_4 +
factor$Resposibility_5 + factor$Resposibility_6 + factor$Resposibility_7 + factor$Resposibility_8 +
factor$Resposibility_9 + factor$Resposibility_10 + factor$Resposibility_11 +
factor$Resposibility_12 + factor$Resposibility_13 + factor$Resposibility_14 + factor$Resposibility_15 +
factor$Resposibility_16 + factor$Resposibility_17 + factor$Resposibility_18 + factor$Resposibility_19 +
factor$Resposibility_20
data$Factor3_sum <- factor$Raskayanie_1 + factor$Raskayanie_2 + factor$Raskayanie_3 + factor$Raskayanie_4 +
factor$Raskayanie_5 + factor$Raskayanie_6 + factor$Raskayanie_7 + factor$Raskayanie_8 + factor$Raskayanie_9 +
factor$Raskayanie_10 + factor$Raskayanie_11 + factor$Raskayanie_12 + factor$Raskayanie_13 + factor$Raskayanie_14 +
factor$Raskayanie_15 + factor$Raskayanie_16 + factor$Raskayanie_17 + factor$Raskayanie_18 + factor$Raskayanie_19 +
factor$Raskayanie_20
data$Factor4_sum <- factor$Repeat_1 + factor$Repeat_2 + factor$Repeat_3 + factor$Repeat_4 + factor$Repeat_5 +
factor$Repeat_6 + factor$Repeat_7 + factor$Repeat_8 + factor$Repeat_9 + factor$Repeat_10 + factor$Repeat_11 +
factor$Repeat_12 + factor$Repeat_13 + factor$Repeat_14 + factor$Repeat_15 + factor$Repeat_16 + factor$Repeat_17 +
factor$Repeat_18 + factor$Repeat_20
data$Factor5_sum <- factor$Forgive_1 + factor$Forgive_2 + factor$Forgive_3 + factor$Forgive_4 + factor$Forgive_5 +
factor$Forgive_6 + factor$Forgive_7 + factor$Forgive_8 + factor$Forgive_9 + factor$Forgive_10 + factor$Forgive_11 +
factor$Forgive_12 + factor$Forgive_13 + factor$Forgive_14 + factor$Forgive_15 + factor$Forgive_16 + factor$Forgive_17 +
factor$Forgive_18 + factor$Forgive_19 + factor$Forgive_20
#Коррелиционная матрица суммы баллов X предиктов
cor_matrix <- (rcorr(as.matrix(data[,(171:180)])))
p_value <- cor_matrix$P
corrplot(cor(data[,(171:180)]), method='num', is.corr = TRUE, p.mat = p_value, sig.level = 0.05)
Error in data.frame(..., check.names = FALSE): аргументы подразумевают разные количества строк: 100, 90
Traceback:
1. corrplot(cor(data[, (171:180)]), method = "num", is.corr = TRUE,
. p.mat = p_value, sig.level = 0.05)
2. cbind(corrPos, pNew)
3. cbind(deparse.level, ...)
4. data.frame(..., check.names = FALSE)
5. stop(gettextf("arguments imply differing number of rows: %s",
. paste(unique(nrows), collapse = ", ")), domain = NA)
covarience_output = capture.output(summary(fit_Base, fit.measures = TRUE, standardized = TRUE))
print(covarience_output[161:177])
[1] "Covariances:" [2] " Estimate Std.Err z-value P(>|z|) Std.lv Std.all" [3] " Sozalenie ~~ " [4] " Resposibility 0.257 0.027 9.648 0.000 1.008 1.008" [5] " Raskayanie 0.210 0.023 9.117 0.000 0.820 0.820" [6] " Repeat 0.172 0.022 7.900 0.000 0.928 0.928" [7] " Forgive 0.250 0.027 9.382 0.000 0.940 0.940" [8] " Resposibility ~~ " [9] " Raskayanie 0.213 0.023 9.235 0.000 0.845 0.845" [10] " Repeat 0.175 0.022 7.946 0.000 0.959 0.959" [11] " Forgive 0.245 0.026 9.446 0.000 0.937 0.937" [12] " Raskayanie ~~ " [13] " Repeat 0.156 0.021 7.325 0.000 0.850 0.850" [14] " Forgive 0.227 0.024 9.529 0.000 0.862 0.862" [15] " Repeat ~~ " [16] " Forgive 0.167 0.021 8.002 0.000 0.874 0.874" [17] ""
ggplot(data,aes(x = Factor1_sum, y = factor1_predict)) +
geom_point() +
xlab("Сумма баллов по шкале") +
ylab("Значения пятифакторной модели") +
ggtitle("Корреляция между значениями пятифакторной модели и суммой баллов по шкале") +
theme(plot.title = element_text(hjust = 0.5))
Вывод: построить точечные графики с доверительным интервалом и выбрать те ситуации, которые имеют равномерное распределение по извинениям и снова провести CFA
#Меняем фомат данных: c wide на long
apology_long <- gather(apology_data, apology_type, values, Sozalenie_1:Forgive_20, factor_key=TRUE)
#Добавляем колонку типа ситуации
situation_1 = rep('situation_1', nrow(filter(apology_long, str_detect(apology_type, "_1$"))))
situation_2 = rep('situation_2', nrow(filter(apology_long, str_detect(apology_type, "_2$"))))
situation_3 = rep('situation_3', nrow(filter(apology_long, str_detect(apology_type, "_3$"))))
situation_4 = rep('situation_4', nrow(filter(apology_long, str_detect(apology_type, "_4$"))))
situation_5 = rep('situation_5', nrow(filter(apology_long, str_detect(apology_type, "_5$"))))
situation_6 = rep('situation_6', nrow(filter(apology_long, str_detect(apology_type, "_6$"))))
situation_7 = rep('situation_7', nrow(filter(apology_long, str_detect(apology_type, "_7$"))))
situation_8 = rep('situation_8', nrow(filter(apology_long, str_detect(apology_type, "_8$"))))
situation_9 = rep('situation_9', nrow(filter(apology_long, str_detect(apology_type, "_9$"))))
situation_10 = rep('situation_10', nrow(filter(apology_long, str_detect(apology_type, "_10$"))))
situation_11 = rep('situation_11', nrow(filter(apology_long, str_detect(apology_type, "_11$"))))
situation_12 = rep('situation_12', nrow(filter(apology_long, str_detect(apology_type, "_12$"))))
situation_13 = rep('situation_13', nrow(filter(apology_long, str_detect(apology_type, "_13$"))))
situation_14 = rep('situation_14', nrow(filter(apology_long, str_detect(apology_type, "_13$"))))
situation_15 = rep('situation_15', nrow(filter(apology_long, str_detect(apology_type, "_15$"))))
situation_16 = rep('situation_16', nrow(filter(apology_long, str_detect(apology_type, "_16$"))))
situation_17 = rep('situation_17', nrow(filter(apology_long, str_detect(apology_type, "_17$"))))
situation_18 = rep('situation_18', nrow(filter(apology_long, str_detect(apology_type, "_18$"))))
situation_19 = rep('situation_19', nrow(filter(apology_long, str_detect(apology_type, "_19$"))))
situation_20 = rep('situation_20', nrow(filter(apology_long, str_detect(apology_type, "_20$"))))
situation_type = c(situation_1, situation_2, situation_3 ,situation_4, situation_5, situation_6,
situation_7, situation_8, situation_9, situation_10, situation_11, situation_12,
situation_13, situation_14, situation_15, situation_16, situation_17, situation_18,
situation_19, situation_20)
#Склеиваем колонку с основным дата-сетос
apology_long_final = cbind(apology_long, situation_type)
#Меняем тип переменной
apology_long_final$situation_type <- as.factor(apology_long_final$situation_type)
apology_long_final$apology_type <- as.factor(apology_long_final$apology_type)
#Считаем доверительные интервалы
tgc <- summarySE(apology_long_final, measurevar="values", groupvars=c("situation_type","apology_type"))
#Сабсет для графиков
tgc1 <- subset(tgc, situation_type == "situation_1" |
situation_type == "situation_2"|
situation_type == "situation_3")
tgc2 <- subset(tgc, situation_type == "situation_4" |
situation_type == "situation_5"|
situation_type == "situation_6")
tgc3 <- subset(tgc, situation_type == "situation_7" |
situation_type == "situation_8"|
situation_type == "situation_9")
tgc4 <- subset(tgc, situation_type == "situation_10" |
situation_type == "situation_11"|
situation_type == "situation_12")
tgc5 <- subset(tgc, situation_type == "situation_13" |
situation_type == "situation_14"|
situation_type == "situation_15")
tgc6 <- subset(tgc, situation_type == "situation_16" |
situation_type == "situation_17"|
situation_type == "situation_18")
tgc7 <- subset(tgc, situation_type == "situation_19" |
situation_type == "situation_20")
ggplot(tgc1, aes(x=apology_type, y=values)) +
geom_errorbar(aes(ymin=values-ci, ymax=values+ci), width=.1) +
geom_point()+
facet_grid(cols = vars(situation_type)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))+
xlab("Тип извинений") +
ylab("Средние значения +- CI")
ggplot(tgc2, aes(x=apology_type, y=values)) +
geom_errorbar(aes(ymin=values-ci, ymax=values+ci), width=.1) +
geom_point()+
facet_grid(cols = vars(situation_type)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1)) +
xlab("Тип извинений") +
ylab("Средние значения +- CI")
ggplot(tgc3, aes(x=apology_type, y=values)) +
geom_errorbar(aes(ymin=values-ci, ymax=values+ci), width=.1) +
geom_point()+
facet_grid(cols = vars(situation_type)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1)) +
xlab("Тип извинений") +
ylab("Средние значения +- CI")
ggplot(tgc4, aes(x=apology_type, y=values)) +
geom_errorbar(aes(ymin=values-ci, ymax=values+ci), width=.1) +
geom_point()+
facet_grid(cols = vars(situation_type)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1)) +
xlab("Тип извинений") +
ylab("Средние значения +- CI")
ggplot(tgc5, aes(x=apology_type, y=values)) +
geom_errorbar(aes(ymin=values-ci, ymax=values+ci), width=.1) +
geom_point()+
facet_grid(cols = vars(situation_type)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1)) +
xlab("Тип извинений") +
ylab("Средние значения + CI")
ggplot(tgc6, aes(x=apology_type, y=values)) +
geom_errorbar(aes(ymin=values-ci, ymax=values+ci), width=.1) +
geom_point()+
facet_grid(cols = vars(situation_type)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1)) +
xlab("Тип извинений") +
ylab("Средние значения + CI")
ggplot(tgc7, aes(x=apology_type, y=values)) +
geom_errorbar(aes(ymin=values-ci, ymax=values+ci), width=.1) +
geom_point()+
facet_grid(cols = vars(situation_type)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1)) +
xlab("Тип извинений") +
ylab("Средние значения +- CI")
#Модель №2. Общая.
#Формула модели №1.
Model_base2 <- "
Sozalenie =~ Sozalenie_3 + Sozalenie_10 + Sozalenie_12 + Sozalenie_16 + Sozalenie_17
Resposibility =~ Resposibility_3 + Resposibility_10 + Resposibility_12 + Resposibility_16 + Resposibility_17
Raskayanie =~ Raskayanie_3 + Raskayanie_10 + Raskayanie_12 + Raskayanie_16 + Raskayanie_17
Repeat =~ Repeat_3 + Repeat_10 + Repeat_12 + Repeat_16 + Repeat_17
Forgive =~ Forgive_3 + Forgive_10 + Forgive_12 + Forgive_16 + Forgive_17"
# #Модель №1: использовать порядковые переменные + аргумент, который работает с ненормально распределением estimator = "WLSMV".
fit_Base2 <- cfa (Model_base2, data = factor, missing = "listwise",estimator = 'WLSMV',
ordered=c("Sozalenie_3", "Sozalenie_10", "Sozalenie_12",
"Sozalenie_16", "Sozalenie_17", "Resposibility_3", "Resposibility_10",
"Resposibility_12", "Resposibility_16",
"Resposibility_17", "Raskayanie_3", "Raskayanie_10", "Raskayanie_12",
"Raskayanie_16", "Raskayanie_17", "Repeat_3", "Repeat_10",
"Repeat_12", "Repeat_16", "Repeat_17", "Forgive_3", "Forgive_10", "Forgive_12",
"Forgive_16", "Forgive_17"))
#Краткий аутпут
fit_Base_scaled2<- c("chisq.scaled", "df.scaled",
"cfi.scaled", "tli.scaled", "rmsea.scaled",
"rmsea.ci.upper.scaled", "rmsea.ci.lower.scaled","srmr","aic","bic")
round(fitMeasures(fit_Base2, fit_Base_scaled2),3)
#Подробные результаты CFA модели №2
covarience_output = capture.output(summary(fit_Base2, fit.measures = TRUE, standardized = TRUE))
print(covarience_output[87:103])
[1] "Covariances:" [2] " Estimate Std.Err z-value P(>|z|) Std.lv Std.all" [3] " Sozalenie ~~ " [4] " Resposibility 0.440 0.026 16.681 0.000 1.090 1.090" [5] " Raskayanie 0.367 0.025 14.821 0.000 0.874 0.874" [6] " Repeat 0.405 0.025 16.365 0.000 1.006 1.006" [7] " Forgive 0.392 0.025 15.470 0.000 0.908 0.908" [8] " Resposibility ~~ " [9] " Raskayanie 0.382 0.026 14.674 0.000 0.929 0.929" [10] " Repeat 0.380 0.025 15.055 0.000 0.965 0.965" [11] " Forgive 0.403 0.025 15.856 0.000 0.953 0.953" [12] " Raskayanie ~~ " [13] " Repeat 0.348 0.026 13.329 0.000 0.851 0.851" [14] " Forgive 0.372 0.026 14.501 0.000 0.847 0.847" [15] " Repeat ~~ " [16] " Forgive 0.332 0.024 13.734 0.000 0.790 0.790" [17] ""
#До проведения CFA
#Субшкала сожаления
reliab_sozalenie <- alpha(sozalenie_data,check.keys = TRUE)
round(reliab_sozalenie$total[2],2) #0.92
#Субшкала ответственности
reliab_resposibility <- alpha(resposibility_data,check.keys = TRUE)
round(reliab_resposibility$total[2],2) #0.91
#Субшкала раскаяния
reliab_raskayanie <- alpha(raskayanie_data,check.keys = TRUE)
round(reliab_raskayanie$total[2],2) #0.92
#Субшкала неповторения
reliab_repeat <- alpha(repeat_data,check.keys = TRUE)
round(reliab_repeat$total[2],2) #0.92
#Субшкала принятия извинений
reliab_forgive <- alpha(forgive_data,check.keys = TRUE)
round(reliab_forgive$total[2],2) #0.93
#Общая шкала
full_scale <- alpha(apology_data,check.keys = TRUE)
round(full_scale$total[2],2) #0.98
| std.alpha | |
|---|---|
| <dbl> | |
| 0.92 |
| std.alpha | |
|---|---|
| <dbl> | |
| 0.91 |
| std.alpha | |
|---|---|
| <dbl> | |
| 0.92 |
| std.alpha | |
|---|---|
| <dbl> | |
| 0.92 |
| std.alpha | |
|---|---|
| <dbl> | |
| 0.93 |
| std.alpha | |
|---|---|
| <dbl> | |
| 0.98 |
#После CFA
#Записываем результаты модели в отдельные переменные
factor1 <- factor[, grepl("Sozalenie", names(factor))]
factor2 <- factor[, grepl("Resposibility", names(factor))]
factor3 <- factor[, grepl("Raskayanie", names(factor))]
factor4 <- factor[, grepl("Repeat", names(factor))]
factor5 <- factor[, grepl("Forgive", names(factor))]
#Надежность первого фактора
round(alpha(factor1)$total[2],2) #0.92
# #Надежность второго фактора
round(alpha(factor2)$total[2],2)#0.91
#Надежность третьего фактора
round(alpha(factor3)$total[2],2) #0.92
#Надежность четвертого фактора
round(alpha(factor4)$total[2],2) #0.92
#Надежность пятого фактора
round(alpha(factor5)$total[2],2) #0.93
| std.alpha | |
|---|---|
| <dbl> | |
| 0.92 |
| std.alpha | |
|---|---|
| <dbl> | |
| 0.91 |
| std.alpha | |
|---|---|
| <dbl> | |
| 0.92 |
| std.alpha | |
|---|---|
| <dbl> | |
| 0.92 |
| std.alpha | |
|---|---|
| <dbl> | |
| 0.93 |
#Надежность первого фактора
splitHalf(factor1)
Split half reliabilities Call: splitHalf(r = factor1) Maximum split half reliability (lambda 4) = 0.95 Guttman lambda 6 = 0.93 Average split half reliability = 0.92 Guttman lambda 3 (alpha) = 0.92 Guttman lambda 2 = 0.93 Minimum split half reliability (beta) = 0.88 Average interitem r = 0.38 with median = 0.39
#Надежность второго фактора
splitHalf(factor2)
Split half reliabilities Call: splitHalf(r = factor2) Maximum split half reliability (lambda 4) = 0.93 Guttman lambda 6 = 0.92 Average split half reliability = 0.91 Guttman lambda 3 (alpha) = 0.91 Guttman lambda 2 = 0.91 Minimum split half reliability (beta) = 0.86 Average interitem r = 0.34 with median = 0.34
#Надежность третьего фактора
splitHalf(factor3)
Split half reliabilities Call: splitHalf(r = factor3) Maximum split half reliability (lambda 4) = 0.95 Guttman lambda 6 = 0.93 Average split half reliability = 0.92 Guttman lambda 3 (alpha) = 0.92 Guttman lambda 2 = 0.92 Minimum split half reliability (beta) = 0.87 Average interitem r = 0.36 with median = 0.36
#Надежность четвертого фактора
splitHalf(factor3)
Split half reliabilities Call: splitHalf(r = factor3) Maximum split half reliability (lambda 4) = 0.95 Guttman lambda 6 = 0.93 Average split half reliability = 0.92 Guttman lambda 3 (alpha) = 0.92 Guttman lambda 2 = 0.92 Minimum split half reliability (beta) = 0.86 Average interitem r = 0.36 with median = 0.36
#Надежность пятого фактора
splitHalf(factor5)
Split half reliabilities Call: splitHalf(r = factor5) Maximum split half reliability (lambda 4) = 0.95 Guttman lambda 6 = 0.93 Average split half reliability = 0.93 Guttman lambda 3 (alpha) = 0.93 Guttman lambda 2 = 0.93 Minimum split half reliability (beta) = 0.88 Average interitem r = 0.39 with median = 0.39
#Собственноручная split-half надежность
#Делим дата-сет на два равных и считаем средние построчно
apology_data_half_1 <- apology_data[1:50]
apology_data_half_1$means_scale <- rowMeans(apology_data_half_1, na.rm = FALSE, dims = 1)
apology_data_half_2 <-apology_data[51:100]
apology_data_half_2$means_scale <- rowMeans(apology_data_half_2, na.rm = FALSE, dims = 1)
#Корреляция Спирмена
round(cor(apology_data_half_1$means_scale, apology_data_half_2$means_scale, method = c("spearman")),2)
Инвентирование шкал
data$Relative_8_R <- (8 - data$Relative_8_R)
data$Relative_9_R <- (8 - data$Relative_9_R)
data$Competence_1_R <- (8 - data$Competence_1_R)
data$Autonomy_2_R <- (8 - data$Autonomy_2_R)
data$Relatedness_2_R <- (8 - data$Relatedness_2_R)
data$Relatedness_3_R <- (8 - data$Relatedness_3_R)
data$Competence_5_R <- (8 - data$Competence_5_R)
data$Relatedness_6_R <- (8 - data$Relatedness_6_R)
data$Relatedness_7_R <- (8 - data$Relatedness_7_R)
data$Competence_6_R <- (8 - data$Competence_6_R)
data$Autonomy_7_R <- (8 - data$Autonomy_7_R)
Подсчет шкал
#Самотолкование
data$Relative_self_int <- data$Relative_1 + data$Relative_2 + data$Relative_3 + data$Relative_4 +
data$Relative_5 + data$Relative_6 + data$Relative_7 + data$Relative_8_R + data$Relative_9_R +
data$Relative_10 + data$Relative_11
data$Collective_self_int<- data$Collective_1 + data$Collective_2 + data$Collective_3 + data$Collective_4 +
data$Collective_5 + data$Collective_6 + data$Collective_7 + data$Collective_8 + data$Collective_9 +
data$Collective_10 + data$Collective_11 + data$Collective_12
data$Autonomy_self_int<- data$Self_autonomy_1 + data$Self_autonomy_2 + data$Self_autonomy_3 +
data$Self_autonomy_4 + data$Self_autonomy_5 + data$Self_autonomy_6 + data$Self_autonomy_7 +
data$Self_autonomy_8 + data$Self_autonomy_9 + data$Self_autonomy_10 + data$Self_autonomy_11 +
data$Self_autonomy_12
#Деси Райн
data$Autonomy<- data$Autonomy_1 + data$Autonomy_2_R + data$Autonomy_3 +
data$Autonomy_4 + data$Autonomy_5 + data$Autonomy_6 + data$Autonomy_7_R
data$Competence<- data$Competence_1_R + data$Competence_2 + data$Competence_3 +
data$Competence_4 + data$Competence_5_R + data$Competence_6_R
data$Relatedness<- data$Relatedness_1 + data$Relatedness_2_R + data$Relatedness_3_R +
data$Relatedness_4 + data$Relatedness_5 + data$Relatedness_6_R + data$Relatedness_7_R +
data$Relatedness_8
final_data <- data[,-c(1:158, 162:170)]
correlation <- rcorr(as.matrix(final_data[4:19]),type = 'spearman')
p_correlation <- correlation$P
corrplot(correlation$r, method='num', is.corr = TRUE, p.mat = p_correlation, sig.level = 0.05)
Error in data.frame(..., check.names = FALSE): аргументы подразумевают разные количества строк: 256, 240
Traceback:
1. corrplot(correlation$r, method = "num", is.corr = TRUE, p.mat = p_correlation,
. sig.level = 0.05)
2. cbind(corrPos, pNew)
3. cbind(deparse.level, ...)
4. data.frame(..., check.names = FALSE)
5. stop(gettextf("arguments imply differing number of rows: %s",
. paste(unique(nrows), collapse = ", ")), domain = NA)
#Для суммы фактора №1
model1_factor <- lm(Factor1_sum ~ (Relative_scale*Collective*Self_autonomy) *
(Autonomy * Competence * Relatedness), data = final_data)
model_1_sum <- summary(model1_factor)[4]
model_1_sum<- as.data.frame(model_1_sum)
names(model_1_sum)[4] = 'p_value'
model_1_sum[model_1_sum['p_value'] < 0.05, ]
#Для фактора (фактор №1)
model2_factor <- lm(factor1_predict ~ (Relative_scale*Collective*Self_autonomy) *
(Autonomy * Competence * Relatedness), data = final_data)
model_2_sum <- summary(model2_factor)[4]
model_2_sum<- as.data.frame(model_2_sum)
names(model_2_sum)[4] = 'p_value'
model_2_sum[model_2_sum['p_value'] < 0.05, ]
| coefficients.Estimate | coefficients.Std..Error | coefficients.t.value | p_value |
|---|---|---|---|
| <dbl> | <dbl> | <dbl> | <dbl> |
| coefficients.Estimate | coefficients.Std..Error | coefficients.t.value | p_value |
|---|---|---|---|
| <dbl> | <dbl> | <dbl> | <dbl> |
#Для суммы фактора №2
model3_factor <- lm(Factor2_sum ~ (Relative_scale*Collective*Self_autonomy) *
(Autonomy * Competence * Relatedness), data = final_data)
model_3_sum <- summary(model3_factor)[4]
model_3_sum<- as.data.frame(model_3_sum)
names(model_3_sum)[4] = 'p_value'
model_3_sum[model_3_sum['p_value'] < 0.05, ]
#Для фактора (фактор №2)
model4_factor <- lm(factor2_predict ~ (Relative_scale*Collective*Self_autonomy) *
(Autonomy * Competence * Relatedness), data = final_data)
model_4_sum <- summary(model4_factor)[4]
model_4_sum<- as.data.frame(model_4_sum)
names(model_4_sum)[4] = 'p_value'
model_4_sum[model_4_sum['p_value'] < 0.05, ]
| coefficients.Estimate | coefficients.Std..Error | coefficients.t.value | p_value |
|---|---|---|---|
| <dbl> | <dbl> | <dbl> | <dbl> |
| coefficients.Estimate | coefficients.Std..Error | coefficients.t.value | p_value |
|---|---|---|---|
| <dbl> | <dbl> | <dbl> | <dbl> |
#Для суммы фактора №3
model4_factor <- lm(Factor3_sum ~ (Relative_scale*Collective*Self_autonomy) *
(Autonomy * Competence * Relatedness), data = final_data)
model_4_sum <- summary(model4_factor)[4]
model_4_sum<- as.data.frame(model_4_sum)
names(model_4_sum)[4] = 'p_value'
model_4_sum[model_4_sum['p_value'] < 0.05, ]
#Для фактора (фактор №3)
model5_factor <- lm(factor3_predict ~ (Relative_scale*Collective*Self_autonomy) *
(Autonomy * Competence * Relatedness), data = final_data)
model_5_sum <- summary(model5_factor)[4]
model_5_sum<- as.data.frame(model_5_sum)
names(model_5_sum)[4] = 'p_value'
model_5_sum[model_5_sum['p_value'] < 0.05, ]
| coefficients.Estimate | coefficients.Std..Error | coefficients.t.value | p_value |
|---|---|---|---|
| <dbl> | <dbl> | <dbl> | <dbl> |
| coefficients.Estimate | coefficients.Std..Error | coefficients.t.value | p_value |
|---|---|---|---|
| <dbl> | <dbl> | <dbl> | <dbl> |
#Для суммы фактора №4
model6_factor <- lm(Factor4_sum ~ (Relative_scale*Collective*Self_autonomy) *
(Autonomy * Competence * Relatedness), data = final_data)
model_6_sum <- summary(model6_factor)[4]
model_6_sum<- as.data.frame(model_6_sum)
names(model_6_sum)[4] = 'p_value'
model_6_sum[model_3_sum['p_value'] < 0.05, ]
#Для фактора (фактор №4)
model7_factor <- lm(factor2_predict ~ (Relative_scale*Collective*Self_autonomy) *
(Autonomy * Competence * Relatedness), data = final_data)
model_7_sum <- summary(model7_factor)[4]
model_7_sum<- as.data.frame(model_7_sum)
names(model_7_sum)[4] = 'p_value'
model_7_sum[model_7_sum['p_value'] < 0.05, ]
| coefficients.Estimate | coefficients.Std..Error | coefficients.t.value | p_value |
|---|---|---|---|
| <dbl> | <dbl> | <dbl> | <dbl> |
| coefficients.Estimate | coefficients.Std..Error | coefficients.t.value | p_value |
|---|---|---|---|
| <dbl> | <dbl> | <dbl> | <dbl> |
#Для суммы фактора №5
model8_factor <- lm(Factor5_sum ~ (Relative_scale*Collective*Self_autonomy) *
(Autonomy * Competence * Relatedness), data = final_data)
model_8_sum <- summary(model8_factor)[4]
model_8_sum<- as.data.frame(model_8_sum)
names(model_8_sum)[4] = 'p_value'
model_8_sum[model_3_sum['p_value'] < 0.05, ]
#Для фактора (фактор №5)
model9_factor <- lm(factor5_predict ~ (Relative_scale*Collective*Self_autonomy) *
(Autonomy * Competence * Relatedness), data = final_data)
model_9_sum <- summary(model9_factor)[4]
model_9_sum<- as.data.frame(model_9_sum)
names(model_9_sum)[4] = 'p_value'
model_9_sum[model_9_sum['p_value'] < 0.05, ]
| coefficients.Estimate | coefficients.Std..Error | coefficients.t.value | p_value |
|---|---|---|---|
| <dbl> | <dbl> | <dbl> | <dbl> |
| coefficients.Estimate | coefficients.Std..Error | coefficients.t.value | p_value |
|---|---|---|---|
| <dbl> | <dbl> | <dbl> | <dbl> |
#Добавляем переменную в дата-сет
factor$Sex <- data$Sex
fit1 <- cfa(Model_base, factor,
estimator = 'MLR',
group = 'Sex')
fit.scaled<-c("chisq.scaled", "df.scaled",
"cfi.scaled", "tli.scaled", "rmsea.scaled",
"rmsea.ci.upper.scaled", "rmsea.ci.lower.scaled","srmr","aic","bic")
round(fitMeasures(fit1, fit.scaled),3)
fit2 <- cfa(Model_base, factor,
estimator = 'MLR',
group = 'Sex',
group.equal = "loadings")
fit.scaled<-c("chisq.scaled", "df.scaled",
"cfi.scaled", "tli.scaled", "rmsea.scaled",
"rmsea.ci.upper.scaled", "rmsea.ci.lower.scaled","srmr","aic","bic")
round(fitMeasures(fit2, fit.scaled),3)
#Добавляем переменную в дата-сет
factor$Town <- data$Town
#Меняем значения
factor$Town <- as.character(factor$Town)
factor["Town"][factor["Town"] == "Столица"] <- "Миллионик"
factor$Town <- as.factor(factor$Town)
fit2 <- cfa(Model_base, factor,
estimator = 'MLR',
group = 'Town')
fit.scaled<-c("chisq.scaled", "df.scaled",
"cfi.scaled", "tli.scaled", "rmsea.scaled",
"rmsea.ci.upper.scaled", "rmsea.ci.lower.scaled","srmr","aic","bic")
round(fitMeasures(fit2, fit.scaled),3)